bitkeeper revision 1.1264 (4243449d-JwBVsSinjAWdYveMNhEjQ)
authormafetter@fleming.research <mafetter@fleming.research>
Thu, 24 Mar 2005 22:52:13 +0000 (22:52 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Thu, 24 Mar 2005 22:52:13 +0000 (22:52 +0000)
Initial attempt at merging shadow code with head of unstable tree.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
26 files changed:
1  2 
.rootkeys
BitKeeper/etc/ignore
tools/libxc/xc.h
tools/libxc/xc_plan9_build.c
tools/misc/Makefile
xen/arch/x86/domain.c
xen/arch/x86/domain_build.c
xen/arch/x86/mm.c
xen/arch/x86/shadow.c
xen/arch/x86/traps.c
xen/arch/x86/vmx.c
xen/arch/x86/vmx_io.c
xen/arch/x86/x86_32/traps.c
xen/common/dom_mem_ops.c
xen/common/keyhandler.c
xen/common/page_alloc.c
xen/common/schedule.c
xen/drivers/char/console.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/mm.h
xen/include/asm-x86/page.h
xen/include/asm-x86/shadow.h
xen/include/asm-x86/x86_32/page.h
xen/include/xen/domain.h
xen/include/xen/perfc_defn.h
xen/include/xen/sched.h

diff --cc .rootkeys
Simple merge
index 6990f1b2aae96ce04143b82c01237d2308e32d1d,1d370131c1a0a77121d8ca789ec28cb79e69e986..0817a4ce7f214867eaa7a177325d9f2c9632abfb
@@@ -50,6 -50,6 +50,7 @@@ install/
  linux-*-xen0/*
  linux-*-xenU/*
  linux-*.patch
++linux-2.6.10-xen-sparse/include/asm-xen/hypervisor.h.smh
  linux-xen-sparse
  mkddbxen
  netbsd-*-tools/*
@@@ -61,14 -61,14 +62,41 @@@ patches/ebtables.dif
  patches/tmp/*
  pristine-*
  tools/*/build/lib*/*.py
++tools/Makefile.smh
  tools/balloon/balloon
++tools/blktap/Makefile.smh
++tools/blktap/blkcow
++tools/blktap/blkcowgnbd
++tools/blktap/blkcowimg
++tools/blktap/blkdump
++tools/blktap/blkgnbd
++tools/blktap/blkimg
++tools/blktap/blockstore.dat
++tools/blktap/blockstored
++tools/blktap/bstest
++tools/blktap/parallax
++tools/blktap/vdi.dot
++tools/blktap/vdi.ps
++tools/blktap/vdi_create
++tools/blktap/vdi_fill
++tools/blktap/vdi_list
++tools/blktap/vdi_snap
++tools/blktap/vdi_snap_list
++tools/blktap/vdi_tree
++tools/blktap/vdi_validate
++tools/blktap/xen/*
  tools/check/.*
++tools/cmdline/*
  tools/cmdline/xen/*
  tools/ioemu/iodev/device-model
  tools/libxc/xen/*
++tools/misc/cpuperf/cpuperf-perfcntr
++tools/misc/cpuperf/cpuperf-xen
  tools/misc/miniterm/miniterm
++tools/misc/xc_shadow
  tools/misc/xen_cpuperf
  tools/misc/xenperf
++tools/tests/test_x86_emulator
  tools/vnet/gc
  tools/vnet/gc*/*
  tools/vnet/vnet-module/*.ko
@@@ -101,6 -101,28 +129,3 @@@ xen/tools/figlet/figle
  xen/xen
  xen/xen-syms
  xen/xen.*
- tools/misc/cpuperf/cpuperf-perfcntr
- tools/misc/cpuperf/cpuperf-xen
- tools/misc/xc_shadow
 -linux-2.6.10-xen-sparse/include/asm-xen/hypervisor.h.smh
 -tools/Makefile.smh
 -tools/blktap/Makefile.smh
 -tools/blktap/blkcow
 -tools/blktap/blkcowgnbd
 -tools/blktap/blkcowimg
 -tools/blktap/blkdump
 -tools/blktap/blkgnbd
 -tools/blktap/blkimg
 -tools/blktap/blockstore.dat
 -tools/blktap/parallax
 -tools/blktap/vdi.dot
 -tools/blktap/vdi.ps
 -tools/blktap/vdi_create
 -tools/blktap/vdi_fill
 -tools/blktap/vdi_list
 -tools/blktap/vdi_snap
 -tools/blktap/vdi_snap_list
 -tools/blktap/vdi_tree
 -tools/blktap/vdi_validate
 -tools/blktap/xen/*
 -tools/cmdline/*
 -tools/tests/test_x86_emulator
 -tools/blktap/blockstored
 -tools/blktap/bstest
Simple merge
index 417cc780327ab6b73885e9169f10c6c97c55746f,c6778d44bc76ce4fc3b15ecaec69b1d83cec038b..c6778d44bc76ce4fc3b15ecaec69b1d83cec038b
mode 100644,100755..100644
index 3b533e5dc42dba61387ab67627b07c84cfef08d3,b815072184a8527bfd7bf714d53e90b065107f47..a1e0165e6a553fac3646158f9e137f65228073cb
@@@ -19,11 -18,11 +18,12 @@@ TARGETS  = xenperf xc_shado
  INSTALL_BIN  = $(TARGETS) xencons
  INSTALL_SBIN = netfix xm xend xensv xenperf
  
- all: $(TARGETS)
+ all: build
+ build: $(TARGETS)
        $(MAKE) -C miniterm
 +      $(MAKE) -C cpuperf
  
- install: all
+ install: build
        [ -d $(DESTDIR)/usr/bin ] || $(INSTALL_DIR) $(DESTDIR)/usr/bin
        [ -d $(DESTDIR)/usr/sbin ] || $(INSTALL_DIR) $(DESTDIR)/usr/sbin
        $(INSTALL_PROG) $(INSTALL_BIN) $(DESTDIR)/usr/bin
Simple merge
Simple merge
index 55af653c0f6ac3f2096b3aa39cb19a3c94f97cf3,ee661bc1861ca0e1769db78ab4347cda53ad1554..1d79c7bb4b4137c04704a7633cb944962f8c884b
@@@ -247,41 -255,31 +248,51 @@@ int map_ldt_shadow_page(unsigned int of
  {
      struct exec_domain *ed = current;
      struct domain *d = ed->domain;
 -    unsigned long l1e;
 +    unsigned long l1e, nl1e, gpfn, gmfn;
 +    unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
 +    int res;
  
-     if ( unlikely(in_irq()) )
-         BUG();
+ #if defined(__x86_64__)
+     /* If in user mode, switch to kernel mode just to read LDT mapping. */
+     extern void toggle_guest_mode(struct exec_domain *);
+     int user_mode = !(ed->arch.flags & TF_kernel_mode);
+ #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
+ #elif defined(__i386__)
+ #define TOGGLE_MODE() ((void)0)
+ #endif
+     BUG_ON(unlikely(in_irq()));
  
 -    __get_user(l1e, (unsigned long *)
 -               &linear_pg_table[l1_linear_offset(ed->arch.ldt_base) + off]);
 +    shadow_sync_va(ed, gva);
++
+     TOGGLE_MODE();
 +    __get_user(l1e, (unsigned long *)&linear_pg_table[l1_linear_offset(gva)]);
+     TOGGLE_MODE();
  
 -    if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
 -         unlikely(!get_page_and_type(
 -             &frame_table[l1_pgentry_to_pfn(mk_l1_pgentry(l1e))],
 -             d, PGT_ldt_page)) )
 +    if ( unlikely(!(l1e & _PAGE_PRESENT)) )
 +        return 0;
 +
 +    gpfn = l1_pgentry_to_pfn(mk_l1_pgentry(l1e));
 +    gmfn = __gpfn_to_mfn(d, gpfn);
 +    if ( unlikely(!VALID_MFN(gmfn)) )
          return 0;
  
-     if ( unlikely(shadow_mode_enabled(d)) )
 -    ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
++    res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
++
++    if ( !res && unlikely(shadow_mode_enabled(d)) )
 +    {
 +        shadow_lock(d);
 +        shadow_remove_all_write_access(d, gpfn, gmfn);
-     }
-     res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
-     if ( unlikely(shadow_mode_enabled(d)) )
++        res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
 +        shadow_unlock(d);
++    }
 +
 +    if ( unlikely(!res) )
 +        return 0;
 +
 +    nl1e = (l1e & ~PAGE_MASK) | (gmfn << PAGE_SHIFT) | _PAGE_RW;
 +
 +    ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(nl1e);
      ed->arch.shadow_ldt_mapcnt++;
  
      return 1;
@@@ -2000,83 -2044,14 +2104,12 @@@ int do_update_va_mapping(unsigned long 
       * XXX When we make this support 4MB superpages we should also deal with 
       * the case of updating L2 entries.
       */
-     if ( likely(!shadow_mode_enabled(d)) )
-     {
-         if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
-                                     mk_l1_pgentry(val))) )
-             err = -EINVAL;
-     }
-     else
-     {
-         unsigned long l1mfn;
-         if ( unlikely(percpu_info[cpu].foreign &&
-                       (shadow_mode_translate(d) ||
-                        shadow_mode_translate(percpu_info[cpu].foreign))) )
-         {
-             // The foreign domain's pfn's are in a different namespace.
-             // We wouldn't be able to figure out how to (re-)shadow our
-             // gpte without additional context.
-             //
-             domain_crash();
-         }
-     
-         check_pagetable(ed, "pre-va"); /* debug */
-         shadow_lock(d);
-         
-         // This is actually overkill - we don't need to sync the L1 itself,
-         // just everything involved in getting to this L1 (i.e. we need
-         // linear_pg_table[l1_linear_offset(va)] to be in sync)...
-         //
-         __shadow_sync_va(ed, va);
- #if 1 /* keep check_pagetables() happy */
-         /*
-          * However, the above doesn't guarantee that there's no snapshot of
-          * the L1 table in question; it just says that the relevant L2 and L1
-          * entries for VA are in-sync.  There might still be a snapshot.
-          *
-          * The checking code in _check_pagetables() assumes that no one will
-          * mutate the shadow of a page that has a snapshot.  It's actually
-          * OK to not sync this page, but it seems simpler to:
-          * 1) keep all code paths the same, and
-          * 2) maintain the invariant for _check_pagetables(), rather than try
-          *    to teach it about this boundary case.
-          * So we flush this L1 page, if it's out of sync.
-          */
-         l1mfn = (l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]) >>
-                  PAGE_SHIFT);
-         if ( mfn_out_of_sync(l1mfn) )
-         {
-             perfc_incrc(extra_va_update_sync);
-             __shadow_sync_mfn(d, l1mfn);
-         }
- #endif /* keep check_pagetables() happy */
-         if ( unlikely(__put_user(val, &l1_pgentry_val(
-                                      linear_pg_table[l1_linear_offset(va)]))) )
-             err = -EINVAL;
-         else
-         {
-             // also need to update the shadow
-             unsigned long spte;
-             l1pte_propagate_from_guest(d, val, &spte);
-             shadow_set_l1e(va, spte, 0);
-             /*
-              * If we're in log-dirty mode then we need to note that we've updated
-              * the PTE in the PT-holding page. We need the machine frame number
-              * for this.
-              */
-             if ( shadow_mode_log_dirty(d) )
-                 mark_dirty(d, va_to_l1mfn(ed, va));
--
-             shadow_unlock(d);
-             check_pagetable(ed, "post-va"); /* debug */
-         }
-     }
 -    if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
 -                                mk_l1_pgentry(val))) )
++    if ( unlikely(!shadow_mode_enabled(d)) )
++        rc = update_shadow_va_mapping(va, val, ed, d);
++    else if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
++                                     mk_l1_pgentry(val))) )
+         rc = -EINVAL;
  
 -    if ( unlikely(shadow_mode_enabled(d)) )
 -        update_shadow_va_mapping(va, val, ed, d);
 -
      deferred_ops = percpu_info[cpu].deferred_ops;
      percpu_info[cpu].deferred_ops = 0;
  
@@@ -2354,13 -2298,8 +2387,11 @@@ void ptwr_flush(const int which
      int            i, cpu = smp_processor_id();
      struct exec_domain *ed = current;
      struct domain *d = ed->domain;
- #ifdef PERF_COUNTERS
      unsigned int   modified = 0;
- #endif
  
 +    // not supported in combination with various shadow modes!
 +    ASSERT( !shadow_mode_enabled(d) );
 +    
      l1va = ptwr_info[cpu].ptinfo[which].l1va;
      ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
  
              domain_crash();
          }
          
-         if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
-             put_page_from_l1e(ol1e, d);
 -        if ( unlikely(sl1e != NULL) )
 -            l1pte_propagate_from_guest(
 -                d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
 -
+         put_page_from_l1e(ol1e, d);
      }
      unmap_domain_mem(pl1e);
 -
 +    
      perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
+     ptwr_info[cpu].ptinfo[which].prev_exec_domain = ed;
+     ptwr_info[cpu].ptinfo[which].prev_nr_updates  = modified;
  
      /*
       * STEP 3. Reattach the L1 p.t. page into the current address space.
       */
  
      ptwr_info[cpu].ptinfo[which].l1va = 0;
 -
 -    if ( unlikely(sl1e != NULL) )
 -    {
 -        unmap_domain_mem(sl1e);
 -        put_shadow_status(d);
 -    }
  }
  
+ static int ptwr_emulated_update(
+     unsigned long addr,
+     unsigned long old,
+     unsigned long val,
+     unsigned int bytes,
+     unsigned int do_cmpxchg)
+ {
+     unsigned long sstat, pte, pfn;
+     struct pfn_info *page;
+     l1_pgentry_t ol1e, nl1e, *pl1e, *sl1e;
+     struct domain *d = current->domain;
+     /* Aligned access only, thank you. */
+     if ( !access_ok(VERIFY_WRITE, addr, bytes) || ((addr & (bytes-1)) != 0) )
+     {
+         MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %p)\n",
+                 bytes, addr);
+         return X86EMUL_UNHANDLEABLE;
+     }
+     /* Turn a sub-word access into a full-word access. */
+     if ( (addr & ((BITS_PER_LONG/8)-1)) != 0 )
+     {
+         int           rc;
+         unsigned long full;
+         unsigned int  mask = addr & ((BITS_PER_LONG/8)-1);
+         /* Align address; read full word. */
+         addr &= ~((BITS_PER_LONG/8)-1);
+         if ( (rc = x86_emulate_read_std(addr, &full, BITS_PER_LONG/8)) )
+             return rc;
+         /* Mask out bits provided by caller. */
+         full &= ~((1UL << (bytes*8)) - 1UL) << (mask*8);
+         /* Shift the caller value and OR in the missing bits. */
+         val  &= (1UL << (bytes*8)) - 1UL;
+         val <<= mask*8;
+         val  |= full;
+     }
+     /* Read the PTE that maps the page being updated. */
+     if ( __get_user(pte, (unsigned long *)
+                     &linear_pg_table[l1_linear_offset(addr)]) )
+     {
+         MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n");
+         return X86EMUL_UNHANDLEABLE;
+     }
+     pfn  = pte >> PAGE_SHIFT;
+     page = &frame_table[pfn];
+     /* We are looking only for read-only mappings of p.t. pages. */
+     if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
+          ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
+          (page_get_owner(page) != d) )
+     {
+         MEM_LOG("ptwr_emulate: Page is mistyped or bad pte (%p, %x)\n",
+                 pte, page->u.inuse.type_info);
+         return X86EMUL_UNHANDLEABLE;
+     }
+     /* Check the new PTE. */
+     nl1e = mk_l1_pgentry(val);
+     if ( unlikely(!get_page_from_l1e(nl1e, d)) )
+         return X86EMUL_UNHANDLEABLE;
+     /* Checked successfully: do the update (write or cmpxchg). */
+     pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
+     if ( do_cmpxchg )
+     {
+         ol1e = mk_l1_pgentry(old);
+         if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
+         {
+             unmap_domain_mem(pl1e);
+             put_page_from_l1e(nl1e, d);
+             return X86EMUL_CMPXCHG_FAILED;
+         }
+     }
+     else
+     {
+         ol1e  = *pl1e;
+         *pl1e = nl1e;
+     }
+     unmap_domain_mem(pl1e);
+     /* Propagate update to shadow cache. */
+     if ( unlikely(shadow_mode_enabled(d)) )
+     {
+         sstat = get_shadow_status(d, page_to_pfn(page));
+         if ( sstat & PSH_shadowed )
+         {
+             sl1e = map_domain_mem(
+                 ((sstat & PSH_pfn_mask) << PAGE_SHIFT) + (addr & ~PAGE_MASK));
+             l1pte_propagate_from_guest(
+                 d, &l1_pgentry_val(nl1e), &l1_pgentry_val(*sl1e));
+             unmap_domain_mem(sl1e);
+         }
+     }
+     /* Finally, drop the old PTE. */
+     put_page_from_l1e(ol1e, d);
+     return X86EMUL_CONTINUE;
+ }
+ static int ptwr_emulated_write(
+     unsigned long addr,
+     unsigned long val,
+     unsigned int bytes)
+ {
+     return ptwr_emulated_update(addr, 0, val, bytes, 0);
+ }
+ static int ptwr_emulated_cmpxchg(
+     unsigned long addr,
+     unsigned long old,
+     unsigned long new,
+     unsigned int bytes)
+ {
+     return ptwr_emulated_update(addr, old, new, bytes, 1);
+ }
+ static struct x86_mem_emulator ptwr_mem_emulator = {
+     .read_std         = x86_emulate_read_std,
+     .write_std        = x86_emulate_write_std,
+     .read_emulated    = x86_emulate_read_std,
+     .write_emulated   = ptwr_emulated_write,
+     .cmpxchg_emulated = ptwr_emulated_cmpxchg
+ };
  /* Write page fault handler: check if guest is trying to modify a PTE. */
  int ptwr_do_page_fault(unsigned long addr)
  {
+     unsigned long       pte, pfn, l2e;
+     struct pfn_info    *page;
+     l2_pgentry_t       *pl2e;
+     int                 which, cpu = smp_processor_id();
+     u32                 l2_idx;
      struct exec_domain *ed = current;
-     unsigned long    pte, pfn, l2e;
-     struct pfn_info *page;
-     l2_pgentry_t    *pl2e;
-     int              which, cpu = smp_processor_id();
-     u32              l2_idx;
  
-     
- #ifdef __x86_64__
-     return 0; /* Writable pagetables need fixing for x86_64. */
- #endif
 +    // not supported in combination with various shadow modes!
 +    ASSERT( !shadow_mode_enabled(ed->domain) );
++
+     /* Can't use linear_l2_table with external tables. */
+     BUG_ON(shadow_mode_external(ed->domain));
  
      /*
       * Attempt to read the PTE that maps the VA being accessed. By checking for
      ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx;
      
      /* For safety, disconnect the L1 p.t. page from current space. */
 -    if ( (which == PTWR_PT_ACTIVE) && 
 -         likely(!shadow_mode_enabled(ed->domain)) )
 +    if ( which == PTWR_PT_ACTIVE )
      {
          *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
- #if 1
          flush_tlb(); /* XXX Multi-CPU guests? */
- #else
-         flush_tlb_all();
- #endif
      }
      
      /* Temporarily map the L1 page, and make a copy of it. */
@@@ -2656,6 -2759,461 +2824,31 @@@ void ptwr_status(void
      page = &frame_table[pfn];
  }
  
 -void audit_domain(struct domain *d)
 -{
 -    int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0;
 -
 -    void adjust (struct pfn_info *page, int dir, int adjtype)
 -    {
 -        int count = page->count_info & PGC_count_mask;
 -
 -        if ( adjtype )
 -        {
 -            int tcount = page->u.inuse.type_info & PGT_count_mask;
 -            
 -            ttot++;
 -
 -            tcount += dir;
 -
 -            if ( tcount < 0 )
 -            {
 -                /* This will only come out once. */
 -                printk("Audit %d: type count whent below zero pfn=%x "
 -                       "taf=%x otaf=%x\n",
 -                       d->id, page-frame_table,
 -                       page->u.inuse.type_info,
 -                       page->tlbflush_timestamp);
 -            }
 -            
 -            page->u.inuse.type_info =
 -                (page->u.inuse.type_info & ~PGT_count_mask) | 
 -                (tcount & PGT_count_mask);
 -        }
 -
 -        ctot++;
 -        count += dir;
 -        if ( count < 0 )
 -        {
 -            /* This will only come out once. */
 -            printk("Audit %d: general count whent below zero pfn=%x "
 -                   "taf=%x otaf=%x\n",
 -                   d->id, page-frame_table,
 -                   page->u.inuse.type_info,
 -                   page->tlbflush_timestamp);
 -        }
 -            
 -        page->count_info =
 -            (page->count_info & ~PGC_count_mask) | 
 -            (count & PGC_count_mask);            
 -
 -    }
 -
 -    void scan_for_pfn(struct domain *d, unsigned long xpfn)
 -    {
 -        unsigned long pfn, *pt;
 -        struct list_head *list_ent;
 -        struct pfn_info *page;
 -        int i;
 -
 -        list_ent = d->page_list.next;
 -        for ( i = 0; (list_ent != &d->page_list); i++ )
 -        {
 -            pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
 -            page = &frame_table[pfn];
 -            
 -            switch ( page->u.inuse.type_info & PGT_type_mask )
 -            {
 -            case PGT_l1_page_table:
 -            case PGT_l2_page_table:
 -                pt = map_domain_mem(pfn<<PAGE_SHIFT);
 -                for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
 -                    if ( (pt[i] & _PAGE_PRESENT) &&
 -                         ((pt[i] >> PAGE_SHIFT) == xpfn) )
 -                        printk("     found dom=%d i=%x pfn=%lx t=%x c=%x\n",
 -                               d->id, i, pfn, page->u.inuse.type_info,
 -                               page->count_info);
 -                unmap_domain_mem(pt);           
 -            }
 -
 -            list_ent = frame_table[pfn].list.next;
 -        }
 -
 -    }
 -
 -    void scan_for_pfn_remote(unsigned long xpfn)
 -    {
 -        struct domain *e;
 -        for_each_domain ( e )
 -            scan_for_pfn( e, xpfn );            
 -    }   
 -
 -    int i, l1, l2;
 -    unsigned long pfn;
 -    struct list_head *list_ent;
 -    struct pfn_info *page;
 -
 -    if ( d != current->domain )
 -        domain_pause(d);
 -    synchronise_pagetables(~0UL);
 -
 -    printk("pt base=%lx sh_info=%x\n",
 -           pagetable_val(d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT,
 -           virt_to_page(d->shared_info)-frame_table);
 -           
 -    spin_lock(&d->page_alloc_lock);
 -
 -    audit_pagelist(d);
 -
 -    /* PHASE 0 */
 -
 -    list_ent = d->page_list.next;
 -    for ( i = 0; (list_ent != &d->page_list); i++ )
 -    {
 -        pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;       
 -        page = &frame_table[pfn];
 -
 -        BUG_ON(page_get_owner(page) != d);
 -
 -        if ( (page->u.inuse.type_info & PGT_count_mask) >
 -             (page->count_info & PGC_count_mask) )
 -            printk("taf > caf %x %x pfn=%lx\n",
 -                   page->u.inuse.type_info, page->count_info, pfn );
 - 
 -#if 0   /* SYSV shared memory pages plus writeable files. */
 -        if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page && 
 -             (page->u.inuse.type_info & PGT_count_mask) > 1 )
 -        {
 -            printk("writeable page with type count >1: pfn=%lx t=%x c=%x\n",
 -                  pfn,
 -                  page->u.inuse.type_info,
 -                  page->count_info );
 -            scan_for_pfn_remote(pfn);
 -        }
 -#endif
 -        if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_none && 
 -             (page->u.inuse.type_info & PGT_count_mask) > 1 )
 -        {
 -            printk("normal page with type count >1: pfn=%lx t=%x c=%x\n",
 -                  pfn,
 -                  page->u.inuse.type_info,
 -                  page->count_info );
 -        }
 -
 -        /* Use tlbflush_timestamp to store original type_info. */
 -        page->tlbflush_timestamp = page->u.inuse.type_info;
 -
 -        list_ent = frame_table[pfn].list.next;
 -    }
 -
 -
 -    /* PHASE 1 */
 -    if ( pagetable_val(d->exec_domain[0]->arch.guest_table) )
 -        adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.guest_table)
 -                           >>PAGE_SHIFT], -1, 1);
 -
 -    list_ent = d->page_list.next;
 -    for ( i = 0; (list_ent != &d->page_list); i++ )
 -    {
 -        unsigned long *pt;
 -        pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;       
 -        page = &frame_table[pfn];
 -
 -        BUG_ON(page_get_owner(page) != d);
 -
 -        switch ( page->u.inuse.type_info & PGT_type_mask )
 -        {
 -        case PGT_l2_page_table:
 -
 -            if ( (page->u.inuse.type_info & PGT_validated) != PGT_validated )
 -                printk("Audit %d: L2 not validated %x\n",
 -                       d->id, page->u.inuse.type_info);
 -
 -            if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
 -                printk("Audit %d: L2 not pinned %x\n",
 -                       d->id, page->u.inuse.type_info);
 -            else
 -                adjust( page, -1, 1 );
 -           
 -            pt = map_domain_mem( pfn<<PAGE_SHIFT );
 -
 -            for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
 -            {
 -                if ( pt[i] & _PAGE_PRESENT )
 -                {
 -                    unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
 -                    struct pfn_info *l1page = &frame_table[l1pfn];
 -
 -                    if ( page_get_owner(l1page) != d )
 -                    {
 -                        printk("L2: Skip bizarre page belonging to other "
 -                               "dom %p\n", page_get_owner(l1page));
 -                        continue;
 -                    }
 -                    
 -                    if ( (l1page->u.inuse.type_info & PGT_type_mask) ==
 -                         PGT_l2_page_table )
 -                        printk("Audit %d: [%x] Found %s Linear PT "
 -                               "t=%x pfn=%lx\n", d->id, i, 
 -                               (l1pfn==pfn) ? "Self" : "Other",
 -                               l1page->u.inuse.type_info,
 -                               l1pfn);
 -                    else if ( (l1page->u.inuse.type_info & PGT_type_mask) !=
 -                              PGT_l1_page_table )
 -                        printk("Audit %d: [%x] Expected L1 t=%x pfn=%lx\n",
 -                               d->id, i,
 -                               l1page->u.inuse.type_info,
 -                               l1pfn);
 -
 -                    adjust(l1page, -1, 1);
 -                }
 -            }
 -
 -            unmap_domain_mem(pt);
 -
 -            break;
 -
 -
 -        case PGT_l1_page_table:
 -            
 -            if ( (page->u.inuse.type_info & PGT_pinned) == PGT_pinned )
 -                adjust( page, -1, 1 );
 -
 -            if ( (page->u.inuse.type_info & PGT_validated) != PGT_validated )
 -                printk("Audit %d: L1 not validated %x\n",
 -                       d->id, page->u.inuse.type_info);
 -#if 0
 -            if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
 -                printk("Audit %d: L1 not pinned %x\n",
 -                       d->id, page->u.inuse.type_info);
 -#endif
 -            pt = map_domain_mem( pfn<<PAGE_SHIFT );
 -
 -            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
 -            {
 -                if ( pt[i] & _PAGE_PRESENT )
 -                {
 -                    unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
 -                    struct pfn_info *l1page = &frame_table[l1pfn];
 -
 -                    if ( l1pfn < 0x100 )
 -                    {
 -                        lowmem_mappings++;
 -                        continue;
 -                    }
 -
 -                    if ( l1pfn > max_page )
 -                    {
 -                        io_mappings++;
 -                        continue;
 -                    }
 -
 -                    if ( pt[i] & _PAGE_RW )
 -                    {
 -
 -                        if ( (l1page->u.inuse.type_info & PGT_type_mask) ==
 -                             PGT_l1_page_table ||
 -                             (l1page->u.inuse.type_info & PGT_type_mask) ==
 -                             PGT_l2_page_table )
 -                            printk("Audit %d: [%x] Ilegal RW t=%x pfn=%lx\n",
 -                                   d->id, i,
 -                                   l1page->u.inuse.type_info,
 -                                   l1pfn);
 -
 -                    }
 -
 -                    if ( page_get_owner(l1page) != d )
 -                    {
 -                        printk("Audit %d: [%lx,%x] Skip foreign page dom=%p "
 -                               "pfn=%lx c=%08x t=%08x m2p=%lx\n",
 -                               d->id, pfn, i,
 -                               page_get_owner(l1page),
 -                               l1pfn,
 -                               l1page->count_info,
 -                               l1page->u.inuse.type_info,
 -                               machine_to_phys_mapping[l1pfn]);    
 -                        continue;
 -                    }
 -
 -                    adjust(l1page, -1, 0);
 -                }
 -            }
 -
 -            unmap_domain_mem(pt);
 -
 -            break;
 -        }       
 -
 -        list_ent = frame_table[pfn].list.next;
 -    }
 -
 -    if ( (io_mappings > 0) || (lowmem_mappings > 0) )
 -        printk("Audit %d: Found %d lowmem mappings and %d io mappings\n",
 -               d->id, lowmem_mappings, io_mappings);
 -
 -    /* PHASE 2 */
 -
 -    ctot = ttot = 0;
 -    list_ent = d->page_list.next;
 -    for ( i = 0; (list_ent != &d->page_list); i++ )
 -    {
 -        pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
 -        page = &frame_table[pfn];
 -
 -        switch ( page->u.inuse.type_info & PGT_type_mask)
 -        {
 -        case PGT_l1_page_table:
 -        case PGT_l2_page_table:
 -            if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
 -            {
 -                printk("Audit %d: type count!=0 t=%x ot=%x c=%x pfn=%lx\n",
 -                       d->id, page->u.inuse.type_info, 
 -                       page->tlbflush_timestamp,
 -                       page->count_info, pfn );
 -                scan_for_pfn_remote(pfn);
 -            }
 -        default:
 -            if ( (page->count_info & PGC_count_mask) != 1 )
 -            {
 -                printk("Audit %d: gen count!=1 (c=%x) t=%x ot=%x pfn=%lx\n",
 -                       d->id, 
 -                       page->count_info,
 -                       page->u.inuse.type_info, 
 -                       page->tlbflush_timestamp, pfn );
 -                scan_for_pfn_remote(pfn);
 -            }
 -            break;
 -        }
 -
 -        list_ent = frame_table[pfn].list.next;
 -    }
 -
 -    /* PHASE 3 */
 -    list_ent = d->page_list.next;
 -    l1 = l2 = 0;
 -    for ( i = 0; (list_ent != &d->page_list); i++ )
 -    {
 -        unsigned long *pt;
 -        pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
 -        page = &frame_table[pfn];
 -
 -        switch ( page->u.inuse.type_info & PGT_type_mask )
 -        {
 -        case PGT_l2_page_table:
 -          l2++;
 -            if ( (page->u.inuse.type_info & PGT_pinned) == PGT_pinned )
 -                adjust( page, 1, 1 );          
 -
 -            pt = map_domain_mem( pfn<<PAGE_SHIFT );
 -
 -            for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
 -            {
 -                if ( pt[i] & _PAGE_PRESENT )
 -                {
 -                    unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
 -                    struct pfn_info *l1page;
 -
 -                    if (l1pfn>max_page)
 -                        continue;
 -
 -                    l1page = &frame_table[l1pfn];
 -
 -                    if ( page_get_owner(l1page) == d )
 -                        adjust(l1page, 1, 1);
 -                }
 -            }
 -
 -            unmap_domain_mem(pt);
 -            break;
 -
 -        case PGT_l1_page_table:
 -          l1++;
 -            if ( (page->u.inuse.type_info & PGT_pinned) == PGT_pinned )
 -                adjust( page, 1, 1 );
 -
 -            pt = map_domain_mem( pfn<<PAGE_SHIFT );
 -
 -            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
 -            {
 -                if ( pt[i] & _PAGE_PRESENT )
 -                {
 -                    unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
 -                    struct pfn_info *l1page;
 -
 -                    if (l1pfn>max_page)
 -                        continue;
 -
 -                    l1page = &frame_table[l1pfn];
 -
 -                    if ( (page_get_owner(l1page) != d) ||
 -                         (l1pfn < 0x100) || (l1pfn > max_page) )
 -                        continue;
 -
 -                    adjust(l1page, 1, 0);
 -                }
 -            }
 -
 -            unmap_domain_mem(pt);
 -            break;
 -        }
 -
 -
 -        page->tlbflush_timestamp = 0;
 -
 -        list_ent = frame_table[pfn].list.next;
 -    }
 -
 -
 -    if ( pagetable_val(d->exec_domain[0]->arch.guest_table) )
 -        adjust(&frame_table[pagetable_val(
 -            d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT], 1, 1);
 -
 -    spin_unlock(&d->page_alloc_lock);
 -    printk("Audit %d: Done. ref=%d xenpages=%d pages=%d l1=%d"
 -           " l2=%d ctot=%d ttot=%d\n", 
 -           d->id, atomic_read(&d->refcnt), d->xenheap_pages, d->tot_pages,
 -           l1, l2, ctot, ttot );
 -
 -    if ( d != current->domain )
 -        domain_unpause(d);
 -}
 -
 -void audit_domains(void)
 -{
 -    struct domain *d;
 -    for_each_domain ( d )
 -        audit_domain(d);
 -}
 -
 -void audit_domains_key(unsigned char key)
 -{
 -    audit_domains();
 -}
 -
+ void audit_pagelist(struct domain *d)
+ {
+     struct list_head *list_ent;
+     int xenpages, totpages;
+     list_ent = d->xenpage_list.next;
+     for ( xenpages = 0; (list_ent != &d->xenpage_list); xenpages++ )
+     {
+         list_ent = list_ent->next;
+     }
+     list_ent = d->page_list.next;
+     for ( totpages = 0; (list_ent != &d->page_list); totpages++ )
+     {
+         list_ent = list_ent->next;
+     }
+     if ( xenpages != d->xenheap_pages ||
+          totpages != d->tot_pages )
+     {
+         printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n",
+                xenpages, d->xenheap_pages, 
+                totpages, d->tot_pages );
+     }
+ }
  #endif /* NDEBUG */
  
  /*
index 435afa45b0f3fa52ebed1ef9821a2ed4c333df2f,f9c1b84ff770645c40bc1dde524295c02a677105..171b10ec61eb829f87bdda0af636a56e70b56160
  #include <asm/domain_page.h>
  #include <asm/page.h>
  #include <xen/event.h>
+ #include <xen/sched.h>
  #include <xen/trace.h>
  
 +static void shadow_free_snapshot(struct domain *d,
 +                                 struct out_of_sync_entry *entry);
 +static void remove_out_of_sync_entries(struct domain *d, unsigned long smfn);
 +
  /********
  
  There's a per-domain shadow table spin lock which works fine for SMP
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 46536bae181fa2e28ac5c365d1397cc2f9fead30,4cae1d2a6373f5dd6634dc495146d625b3751d2b..798516a5a2d5279e82fa2436d4e2e2d6be3e8d57
  #include <xen/spinlock.h>
  #include <xen/slab.h>
  #include <xen/irq.h>
+ #include <xen/softirq.h>
  #include <asm/domain_page.h>
+ #include <asm/page.h>
 +#include <asm/shadow.h>
  
  /*
   * Comma-separated list of hexadecimal page numbers containing bad bytes.
Simple merge
Simple merge
Simple merge
index 1c9796df3716edf122b0075fb687585ccb976b96,d2d68e7cc28da8ea0ac6f0f6166f66b01e72a2a7..65d9426c26a18a53a191f96203c92d7cc83dfd53
@@@ -360,24 -310,22 +352,32 @@@ do 
                                       PTWR_CLEANUP_INACTIVE);              \
      } while ( 0 )
  
 +int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
 +
  #ifndef NDEBUG
 -void audit_domain(struct domain *d);
 +
 +#define AUDIT_ALREADY_LOCKED ( 1u << 0 )
 +#define AUDIT_ERRORS_OK      ( 1u << 1 )
 +#define AUDIT_QUIET          ( 1u << 2 )
 +
 +void _audit_domain(struct domain *d, int flags);
 +#define audit_domain(_d) _audit_domain((_d), 0)
  void audit_domains(void);
 +
  #else
 -#define audit_domain(_d) ((void)0)
 -#define audit_domains()  ((void)0)
 -#endif
  
 -void propagate_page_fault(unsigned long addr, u16 error_code);
 +#define _audit_domain(_d, _f) ((void)0)
 +#define audit_domain(_d)      ((void)0)
 +#define audit_domains()       ((void)0)
 +
 +#endif
  
+ /*
+  * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must 
+  * hold a reference to the page.
+  */
+ int update_grant_va_mapping(unsigned long va,
+                             unsigned long val,
+                             struct domain *d,
+                             struct exec_domain *ed);
  #endif /* __ASM_X86_MM_H__ */
index 660ba4cfd8a8b3ff34a41127e8e98e4ec2f44b79,78e2648b5465b50dfe7a5f44d6e7e5880d9eb399..b498a6025eaf11d672dba5e0d088f35c7aa21e53
@@@ -56,12 -56,23 +56,28 @@@ typedef struct { unsigned long pt_lo; 
  #include <asm/bitops.h>
  #include <asm/flushtlb.h>
  
- #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
- #define __linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START + \
-      (LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
+ #define linear_l1_table                                                 \
+     ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
 -#define linear_l2_table                                                 \
++#define __linear_l2_table                                                 \
+     ((l2_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0))))
 -#define linear_l3_table                                                 \
++#define __linear_l3_table                                                 \
+     ((l3_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) +   \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1))))
 -#define linear_l4_table                                                 \
++#define __linear_l4_table                                                 \
+     ((l4_pgentry_t *)(LINEAR_PT_VIRT_START +                            \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<0)) +   \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<1)) +   \
+                      (LINEAR_PT_VIRT_START >> (PAGETABLE_ORDER<<2))))
++
+ #define linear_pg_table linear_l1_table
 -#define va_to_l1mfn(_va) \
 -    (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
 +#define linear_l2_table(_ed) ((_ed)->arch.guest_vtable)
++#define linear_l3_table(_ed) ((_ed)->arch.guest_vl3table)
++#define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
 +
- #define va_to_l1mfn(_ed, _va) (l2_pgentry_val(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
++#define va_to_l1mfn(_ed, _va) \
++    (l2_pgentry_val(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
  
  extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
  
index e2f1e4f8b9504d2c4f7f7964e036c4a1bd7785b8,47215107055d0762d7dd5b6d0c37486eb42e55be..625542b26bcf17ee99213ebaa02d9710403b7bff
  #include <xen/config.h>
  #include <xen/types.h>
  #include <xen/perfc.h>
+ #include <xen/sched.h>
  #include <asm/processor.h>
  #include <asm/domain_page.h>
+ #include <public/dom0_ops.h>
  
 -/* Shadow PT flag bits in shadow_status */
 -#define PSH_shadowed    (1<<31) /* page has a shadow. PFN points to shadow */
 -#define PSH_hl2         (1<<30) /* page is an hl2 */
 -#define PSH_pfn_mask    ((1<<21)-1)
 +/* Shadow PT operation mode : shadow-mode variable in arch_domain. */
  
 -/* Shadow PT operation mode: shadow-mode variable in arch_domain. */
  #define SHM_enable    (1<<0) /* we're in one of the shadow modes */
  #define SHM_log_dirty (1<<1) /* enable log dirty mode */
 -#define SHM_translate (1<<2) /* do p2m translation on guest tables */
 +#define SHM_translate (1<<2) /* do p2m tranaltion on guest tables */
  #define SHM_external  (1<<3) /* external page table, not used by Xen */
  
  #define shadow_mode_enabled(_d)   ((_d)->arch.shadow_mode)
@@@ -175,57 -126,32 +177,63 @@@ static inline void shadow_mode_disable(
          __shadow_mode_disable(d);
  }
  
 -extern unsigned long shadow_l2_table( 
 -    struct domain *d, unsigned long gmfn);
 -  
 -static inline void shadow_invalidate(struct exec_domain *ed) {
 -    if ( !VMX_DOMAIN(ed) )
 -        BUG();
 -    memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
 -}
 +/************************************************************************/
  
 -#define SHADOW_DEBUG 0
 -#define SHADOW_VERBOSE_DEBUG 0
 -#define SHADOW_HASH_DEBUG 0
 +#define __mfn_to_gpfn(_d, mfn)                         \
 +    ( (shadow_mode_translate(_d))                      \
 +      ? machine_to_phys_mapping[(mfn)]                 \
 +      : (mfn) )
  
 -#if SHADOW_DEBUG
 -extern int shadow_status_noswap;
 -#endif
 +#define __gpfn_to_mfn(_d, gpfn)                        \
 +    ( (shadow_mode_translate(_d))                      \
 +      ? ({ ASSERT(current->domain == (_d));            \
 +           phys_to_machine_mapping(gpfn); })           \
 +      : (gpfn) )
 +
++#define __translate_gpfn_to_mfn(_d, gpfn)              \
++    ( (shadow_mode_translate(_d))                      \
++      ? translate_gpfn_to_mfn(_d, gpfn)                \
++      : (gpfn) )
++
++#define translate_gpfn_to_mfn gpfn_to_mfn_safe
++
 +extern unsigned long gpfn_to_mfn_safe(
 +    struct domain *d, unsigned long gpfn);
 +
 +/************************************************************************/
  
  struct shadow_status {
 -    unsigned long pfn;            /* Guest pfn.             */
 -    unsigned long smfn_and_flags; /* Shadow mfn plus flags. */
 -    struct shadow_status *next;   /* Pull-to-front list.    */
 +    struct shadow_status *next;   /* Pull-to-front list per hash bucket. */
 +    unsigned long gpfn_and_flags; /* Guest pfn plus flags. */
 +    unsigned long smfn;           /* Shadow mfn.           */
  };
  
  #define shadow_ht_extra_size 128
  #define shadow_ht_buckets    256
  
 +struct out_of_sync_entry {
 +    struct out_of_sync_entry *next;
 +    unsigned long gpfn;    /* why is this here? */
 +    unsigned long gmfn;
 +    unsigned long snapshot_mfn;
 +    unsigned long writable_pl1e; /* NB: this is a machine address */
 +};
 +
 +#define out_of_sync_extra_size 127
 +
 +#define SHADOW_SNAPSHOT_ELSEWHERE (-1L)
 +
 +/************************************************************************/
 +#define SHADOW_DEBUG 0
 +#define SHADOW_VERBOSE_DEBUG 0
 +#define SHADOW_VVERBOSE_DEBUG 0
 +#define SHADOW_HASH_DEBUG 0
 +#define FULLSHADOW_DEBUG 0
 +
 +#if SHADOW_DEBUG
 +extern int shadow_status_noswap;
 +#endif
 +
  #ifdef VERBOSE
  #define SH_LOG(_f, _a...)                                               \
      printk("DOM%uP%u: SH_LOG(%d): " _f "\n",                            \
Simple merge
Simple merge
index bcb35552069e5e8e4942a1812d847c31824cc581,e5a4e4640f076eedd585760c30f215d4068432f1..2e776841e307a957db43722f788f48f449ad18d9
@@@ -35,21 -19,13 +35,22 @@@ PERFCOUNTER_CPU( num_page_updates, "num
  PERFCOUNTER_CPU( calls_to_update_va, "calls_to_update_va_map" )
  PERFCOUNTER_CPU( page_faults, "page faults" )
  PERFCOUNTER_CPU( copy_user_faults, "copy_user faults" )
 +
 +PERFCOUNTER_CPU(shadow_fault_calls,                "calls to shadow_fault")
 +PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not present")
 +PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present")
 +PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping,      "sf bailed due to a ro mapping")
 +PERFCOUNTER_CPU(shadow_fault_fixed,                "sf fixed the pgfault")
 +PERFCOUNTER_CPU(write_fault_bail,                  "sf bailed due to write_fault")
 +PERFCOUNTER_CPU(read_fault_bail,                   "sf bailed due to read_fault")
 +
  PERFCOUNTER_CPU( map_domain_mem_count, "map_domain_mem count" )
+ PERFCOUNTER_CPU( ptwr_emulations, "writable pt emulations" )
  
 -PERFCOUNTER_CPU( shadow_l2_table_count, "shadow_l2_table count" )
 -PERFCOUNTER_CPU( shadow_l1_table_count, "shadow_l1_table count" )
 -PERFCOUNTER_CPU( unshadow_table_count, "unshadow_table count" )
 -PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" )
 +PERFCOUNTER_CPU( shadow_l2_table_count,  "shadow_l2_table count" )
 +PERFCOUNTER_CPU( shadow_l1_table_count,  "shadow_l1_table count" )
 +PERFCOUNTER_CPU( unshadow_table_count,   "unshadow_table count" )
 +PERFCOUNTER_CPU( shadow_fixup_count,     "shadow_fixup count" )
  PERFCOUNTER_CPU( shadow_update_va_fail1, "shadow_update_va_fail1" )
  PERFCOUNTER_CPU( shadow_update_va_fail2, "shadow_update_va_fail2" )
  
Simple merge